Misc cleanup.
Signed-off-by: Tristan Gingold <tgingold@free.fr>
IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, u64 va, u64 ps)
{
- vmx_vcpu_ptc_ga(vcpu, va, ps);
- return IA64_ILLOP_FAULT;
+ return vmx_vcpu_ptc_ga(vcpu, va, ps);
}
/*
IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps)
// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
ENTRY(vmx_debug_vector)
VMX_DBG_FAULT(29)
- VMX_FAULT(29)
+ VMX_REFLECT(29)
END(vmx_debug_vector)
.org vmx_ia64_ivt+0x5a00
u64 vpsr = VCPU(vcpu, vpsr);
vector = vec2off[vec];
- if(!(vpsr&IA64_PSR_IC)&&(vector!=IA64_DATA_NESTED_TLB_VECTOR)){
- panic_domain(regs, "Guest nested fault vector=%lx!\n", vector);
- }
switch (vec) {
-
+ case 5: // IA64_DATA_NESTED_TLB_VECTOR
+ break;
case 22: // IA64_INST_ACCESS_RIGHTS_VECTOR
+ if (!(vpsr & IA64_PSR_IC))
+ goto nested_fault;
if (vhpt_access_rights_fixup(vcpu, ifa, 0))
return;
break;
case 25: // IA64_DISABLED_FPREG_VECTOR
-
+ if (!(vpsr & IA64_PSR_IC))
+ goto nested_fault;
if (FP_PSR(vcpu) & IA64_PSR_DFH) {
FP_PSR(vcpu) = IA64_PSR_MFH;
if (__ia64_per_cpu_var(fp_owner) != vcpu)
}
break;
-
+
case 32: // IA64_FP_FAULT_VECTOR
+ if (!(vpsr & IA64_PSR_IC))
+ goto nested_fault;
// handle fpswa emulation
// fp fault
status = handle_fpu_swa(1, regs, isr);
break;
case 33: // IA64_FP_TRAP_VECTOR
+ if (!(vpsr & IA64_PSR_IC))
+ goto nested_fault;
//fp trap
status = handle_fpu_swa(0, regs, isr);
if (!status)
return;
}
break;
-
+
+ case 29: // IA64_DEBUG_VECTOR
+ case 35: // IA64_TAKEN_BRANCH_TRAP_VECTOR
+ case 36: // IA64_SINGLE_STEP_TRAP_VECTOR
+ if (vmx_guest_kernel_mode(regs)
+ && current->domain->debugger_attached) {
+ domain_pause_for_debugger();
+ return;
+ }
+ if (!(vpsr & IA64_PSR_IC))
+ goto nested_fault;
+ break;
+
+ default:
+ if (!(vpsr & IA64_PSR_IC))
+ goto nested_fault;
+ break;
}
VCPU(vcpu,isr)=isr;
VCPU(vcpu,iipa) = regs->cr_iip;
set_ifa_itir_iha(vcpu,ifa,1,1,1);
}
inject_guest_interruption(vcpu, vector);
+ return;
+
+ nested_fault:
+ panic_domain(regs, "Guest nested fault vector=%lx!\n", vector);
}
#include <asm/processor.h>
#include <asm/vmx_mm_def.h>
-
+#ifdef CHECK_FAULT
/*
* Return:
* 0: Not reserved indirect registers
return 0;
}
+#endif
/*
* Return:
}
-
+#ifdef CHECK_FAULT
/*
* Return:
* 1: CR reserved fields are not zero
panic ("Unsupported CR");
return 0;
}
+#endif
-
-
+#if 0
/*
* Return:
* 0: Indirect Reg reserved fields are not zero
return 1;
}
-
+#endif
*/
VCPU(vcpu,vpsr) = value &
(~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
- IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
- ));
+ IA64_PSR_ED | IA64_PSR_IA));
if ( !old_psr.i && (value & IA64_PSR_I) ) {
// vpsr.i 0->1
{
u64 val;
- if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
- panic_domain(vcpu_regs(vcpu),"get_psr nat bit fault\n");
+ if (vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
+ panic_domain(vcpu_regs(vcpu),"get_psr nat bit fault\n");
return vmx_vcpu_set_psr_l(vcpu, val);
}
static IA64FAULT vmx_emul_mov_to_ibr(VCPU *vcpu, INST64 inst)
{
u64 r3,r2;
- return IA64_NO_FAULT;
#ifdef CHECK_FAULT
IA64_PSR vpsr;
vpsr.val=vmx_vcpu_get_psr(vcpu);
return IA64_FAULT;
#endif //CHECK_FAULT
}
- return (vmx_vcpu_set_ibr(vcpu,r3,r2));
+ return vmx_vcpu_set_ibr(vcpu,r3,r2);
}
static IA64FAULT vmx_emul_mov_to_pmc(VCPU *vcpu, INST64 inst)
static IA64FAULT vmx_emul_mov_from_dbr(VCPU *vcpu, INST64 inst)
{
u64 r3,r1;
+ IA64FAULT res;
#ifdef CHECK_FAULT
if(check_target_register(vcpu, inst.M43.r1)){
set_illegal_op_isr(vcpu);
return IA64_FAULT;
}
#endif //CHECK_FAULT
- r1 = vmx_vcpu_get_dbr(vcpu, r3);
+ res = vmx_vcpu_get_ibr(vcpu, r3, &r1);
+ if (res != IA64_NO_FAULT)
+ return res;
return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
}
static IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst)
{
u64 r3,r1;
+ IA64FAULT res;
#ifdef CHECK_FAULT
if(check_target_register(vcpu, inst.M43.r1)){
set_illegal_op_isr(vcpu);
return IA64_FAULT;
}
#endif //CHECK_FAULT
- r1 = vmx_vcpu_get_ibr(vcpu, r3);
+ res = vmx_vcpu_get_dbr(vcpu, r3, &r1);
+ if (res != IA64_NO_FAULT)
+ return res;
return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
}
break;
case EVENT_VMSW:
printk ("Unimplemented instruction %ld\n", cause);
- status=IA64_FAULT;
+ status=IA64_FAULT;
break;
default:
- panic_domain(regs,"unknown cause %ld, iip: %lx, ipsr: %lx\n", cause,regs->cr_iip,regs->cr_ipsr);
+ panic_domain(regs,"unknown cause %ld, iip: %lx, ipsr: %lx\n",
+ cause,regs->cr_iip,regs->cr_ipsr);
break;
};
#if 0
- if (status == IA64_FAULT)
+ if (status != IA64_NO_FAULT)
panic("Emulation failed with cause %d:\n", cause);
#endif
- if ( status == IA64_NO_FAULT && cause !=EVENT_RFI ) {
- vcpu_increment_iip(vcpu);
+ switch (status) {
+ case IA64_RSVDREG_FAULT:
+ set_rsv_reg_field_isr(vcpu);
+ rsv_reg_field(vcpu);
+ break;
+ case IA64_ILLOP_FAULT:
+ set_illegal_op_isr(vcpu);
+ illegal_op(vcpu);
+ break;
+ case IA64_FAULT:
+ /* Registers aleady set. */
+ break;
+ case IA64_NO_FAULT:
+ if ( cause != EVENT_RFI )
+ vcpu_increment_iip(vcpu);
+ break;
}
+
recover_if_physical_mode(vcpu);
return;
if (val >= HYPERVISOR_VIRT_START && val <= HYPERVISOR_VIRT_END)
return IA64_ILLOP_FAULT;
} else {
- /* Mask PL0. */
- val &= ~(1UL << 56);
+ if (!VMX_DOMAIN(vcpu)) {
+ /* Mask PL0. */
+ val &= ~(1UL << 56);
+ }
}
if (val != 0)
vcpu->arch.dbg_used |= (1 << reg);
if (val >= HYPERVISOR_VIRT_START && val <= HYPERVISOR_VIRT_END)
return IA64_ILLOP_FAULT;
} else {
- /* Mask PL0. */
- val &= ~(1UL << 56);
+ if (!VMX_DOMAIN(vcpu)) {
+ /* Mask PL0. */
+ val &= ~(1UL << 56);
+ }
}
if (val != 0)
vcpu->arch.dbg_used |= (1 << (reg + IA64_NUM_DBG_REGS));
#ifdef XEN
# define guest_mode(regs) (ia64_psr(regs)->cpl != 0)
# define guest_kernel_mode(regs) (ia64_psr(regs)->cpl == CONFIG_CPL0_EMUL)
+# define vmx_guest_kernel_mode(regs) (ia64_psr(regs)->cpl == 0)
#else
# define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
#endif
extern void inject_guest_interruption(struct vcpu *vcpu, u64 vec);
extern void set_illegal_op_isr (struct vcpu *vcpu);
extern void illegal_op (struct vcpu *vcpu);
+extern void set_rsv_reg_field_isr (struct vcpu *vcpu);
+extern void rsv_reg_field (struct vcpu *vcpu);
extern void vmx_relinquish_guest_resources(struct domain *d);
extern void vmx_relinquish_vcpu_resources(struct vcpu *v);
extern void vmx_die_if_kernel(char *str, struct pt_regs *regs, long err);
static inline IA64FAULT vmx_vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val)
{
- // TODO: unimplemented DBRs return a reserved register fault
- // TODO: Should set Logical CPU state, not just physical
- ia64_set_dbr(reg, val);
- return IA64_NO_FAULT;
+ return vcpu_set_dbr(vcpu, reg, val);
}
static inline IA64FAULT vmx_vcpu_set_ibr(VCPU * vcpu, u64 reg, u64 val)
{
- // TODO: unimplemented IBRs return a reserved register fault
- // TODO: Should set Logical CPU state, not just physical
- ia64_set_ibr(reg, val);
- return IA64_NO_FAULT;
+ return vcpu_set_ibr(vcpu, reg, val);
}
-static inline u64 vmx_vcpu_get_dbr(VCPU * vcpu, u64 reg)
+static inline IA64FAULT vmx_vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 *pval)
{
- // TODO: unimplemented DBRs return a reserved register fault
- return ((u64)ia64_get_dbr(reg));
+ return vcpu_get_dbr(vcpu, reg, pval);
}
-static inline u64 vmx_vcpu_get_ibr(VCPU * vcpu, u64 reg)
+static inline IA64FAULT vmx_vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 *pval)
{
- // TODO: unimplemented IBRs return a reserved register fault
- return ((u64)ia64_get_ibr(reg));
+ return vcpu_get_ibr(vcpu, reg, pval);
}
/**************************************************************************